# Load libraries, install if needed
library(tidyverse)
#> Warning: package 'tidyr' was built under R version 4.0.5
library(tidylog)
library(RCurl)
library(RColorBrewer)
#> Warning: package 'RColorBrewer' was built under R version 4.0.5
library(sdmTMB) # install.packages("sdmTMB", dependencies = TRUE) should get you INLA as well, else do INLA manually first:
library(INLA) # install.packages("INLA",repos=c(getOption("repos"),INLA="https://inla.r-inla-download.org/R/stable"), dep=TRUE)
#> Warning: package 'INLA' was built under R version 4.0.4
library(devtools)
library(patchwork)
library(sf)
#> Warning: package 'sf' was built under R version 4.0.5
library(ggeffects)
library(visreg)
library(boot)
# Source utm function and map plots
source_url("https://raw.githubusercontent.com/maxlindmark/marine-litter/main/R/functions/lon-lat-utm.R")
source_url("https://raw.githubusercontent.com/maxlindmark/marine-litter/main/R/functions/map-plot.R")
Filter litter categories that have enough data to work with (hard to fit models to e.g., glass and metal since they occur so rarely in the data, some years have nothing. Could consider pooling years for those. See exploratory model fitting script (doesn’t exist yet))
# Read and make data long so that I can for loop through all categories
west <- readr::read_csv("https://raw.githubusercontent.com/maxlindmark/marine-litter/main/data/west_coast_litter.csv") %>%
mutate(year_f = as.factor(year),
quarter_f = as.factor(quarter),
depth_sc = (depth - mean(depth)) / sd(depth)) %>%
pivot_longer(c("fishery_a", "fishery_b", "plast_a", "plast_b", "sup_a", "sup_b", "tot_a", "tot_b"),
names_to = "litter_category", values_to = "density")
#> Rows: 559 Columns: 23
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> chr (4): haul_id, ansse_area, trend_area, var
#> dbl (18): plast_a, sup_a, fishery_a, tot_a, plast_b, quarter, st_no, haul_no...
#> lgl (1): balse_area
#>
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 9 unique values and 0% NA
#>
#> new variable 'quarter_f' (factor) with 2 unique values and 0% NA
#>
#> new variable 'depth_sc' (double) with 350 unique values and 0% NA
#>
#> pivot_longer: reorganized (plast_a, sup_a, fishery_a, tot_a, plast_b, …) into (litter_category, density) [was 559x26, now 4472x20]
east <- readr::read_csv("https://raw.githubusercontent.com/maxlindmark/marine-litter/main/data/east_coast_litter.csv") %>%
mutate(year_f = as.factor(year),
quarter_f = as.factor(quarter),
depth_sc = (depth - mean(depth)) / sd(depth)) %>%
pivot_longer(c("fishery_a", "fishery_b", "plast_a", "plast_b", "sup_a", "sup_b", "tot_a", "tot_b"),
names_to = "litter_category", values_to = "density")
#> Rows: 546 Columns: 23
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> chr (4): haul_id, balse_area, trend_area, var
#> dbl (18): plast_a, sup_a, fishery_a, tot_a, plast_b, quarter, st_no, haul_no...
#> lgl (1): ansse_area
#>
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 9 unique values and 0% NA
#>
#> new variable 'quarter_f' (factor) with 2 unique values and 0% NA
#>
#> new variable 'depth_sc' (double) with 417 unique values and 0% NA
#>
#> pivot_longer: reorganized (plast_a, sup_a, fishery_a, tot_a, plast_b, …) into (litter_category, density) [was 546x26, now 4368x20]
# Load pred grids
pred_grid_west <- read_csv("data/pred_grid_west.csv") %>%
mutate(year_f = as.factor(year),
quarter_f = as.factor(1),
depth_sc = (depth - mean(west$depth)) / sd(west$depth)) %>%
mutate(X = X*1000,
Y = Y*1000) %>%
drop_na(area)
#> Rows: 37512 Columns: 7
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> chr (1): area
#> dbl (6): X, Y, year, lon, lat, depth
#>
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 9 unique values and 0% NA
#>
#> new variable 'quarter_f' (factor) with one unique value and 0% NA
#>
#> new variable 'depth_sc' (double) with 2,732 unique values and 0% NA
#>
#> mutate: changed 37,512 values (100%) of 'X' (0 new NA)
#>
#> changed 37,512 values (100%) of 'Y' (0 new NA)
#>
#> drop_na: removed 15,030 rows (40%), 22,482 rows remaining
# ggplot(pred_grid_west, aes(X*1000, Y*1000, color = area)) +
# geom_point()
pred_grid_east <- read_csv("data/pred_grid_east.csv") %>%
mutate(year_f = as.factor(year),
quarter_f = as.factor(1),
depth_sc = (depth - mean(east$depth)) / sd(east$depth)) %>%
mutate(X = X*1000,
Y = Y*1000)
#> Rows: 121410 Columns: 7
#> ── Column specification ────────────────────────────────────────────────────────
#> Delimiter: ","
#> chr (1): area
#> dbl (6): X, Y, year, lon, lat, depth
#>
#> ℹ Use `spec()` to retrieve the full column specification for this data.
#> ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
#> mutate: new variable 'year_f' (factor) with 9 unique values and 0% NA
#>
#> new variable 'quarter_f' (factor) with one unique value and 0% NA
#>
#> new variable 'depth_sc' (double) with 6,366 unique values and 0% NA
#>
#> mutate: changed 121,410 values (100%) of 'X' (0 new NA)
#>
#> changed 121,410 values (100%) of 'Y' (0 new NA)
For plastic, sup and fishery I have enough data to fit spatial models with both biomass density (Tweedie) and numbers (Poisson). For the other categories, I will only fit presence/absence models, because that’s almost what we got anyway.
# https://haakonbakkagit.github.io/btopic104.html
# https://haakonbakkagit.github.io/btopic114.html
# max.edge <- mean(c(diff(range(num$X)), diff(range(num$Y)))) / 15
# cutoff <- max.edge/5
west_mesh <- make_mesh(west %>% filter(litter_category == "fishery_a"), c("X", "Y"), cutoff = 4)
#> filter: removed 3,913 rows (88%), 559 rows remaining
#mesh <- make_mesh(num %>% filter(litter_category == "sup"), c("X", "Y"), type = "kmeans", n_knots = 50)
plot(west_mesh)
east_mesh <- make_mesh(east %>% filter(litter_category == "fishery_a"), c("X", "Y"), cutoff = 4)
#> filter: removed 3,822 rows (88%), 546 rows remaining
#mesh <- make_mesh(num %>% filter(litter_category == "sup"), c("X", "Y"), type = "kmeans", n_knots = 50)
plot(east_mesh)
data_list_coef <- list()
data_list_pred <- list()
data_list_sim <- list()
data_list_sims <- list()
for(i in unique(east$litter_category)) {
dd <- east %>%
filter(litter_category == i) %>%
droplevels()
m <- sdmTMB(
data = dd,
formula = density ~ 0 + year_f + depth_sc,
mesh = east_mesh,
family = tweedie(),
spatial = "on",
time = "year",
spatiotemporal = "off"
)
sanity(m)
tidy(m, conf.int = TRUE)
data_list_coef[[i]] <- tidy(m, conf.int = TRUE) %>% mutate(model = paste("east", i, sep = "_"))
# Plot residuals
mcmc_res <- residuals(m, type = "mle-mcmc", mcmc_iter = 201, mcmc_warmup = 200)
qqnorm(mcmc_res, asp = 1, main = paste("Normal Q-Q Plot", i, sep = " "))
qqline(mcmc_res)
# Predict on grid
pred <- predict(m, newdata = pred_grid_east) %>%
mutate(model = i)
data_list_pred[[i]] <- pred
# Get sims
nsim <- 500
sim <- predict(m, newdata = pred_grid_east, nsim = nsim)
# Get index & full index (i.e. returning all sims)
index_sim <- get_index_sims(sim,
area = rep(2*2, nrow(sim))) %>% mutate(model = paste("east", i, sep = "_"))
data_list_sim[[i]] <- index_sim
index_sim_full <- get_index_sims(sim,
area = rep(2*2, nrow(sim)),
return_sims = TRUE) %>% mutate(model = paste("east", i, sep = "_"))
data_list_sims[[i]] <- index_sim_full
# See how mean index compares to data
ncells <- filter(pred_grid_east, year == max(pred_grid_east$year)) %>% nrow()
index_sim_avg <- get_index_sims(sim, area = rep(1/ncells, nrow(sim)))
print(ggplot(index_sim_avg, aes(year, y = est, ymin = lwr, ymax = upr)) +
geom_line() +
geom_line(data = dd %>%
group_by(year) %>%
summarise(mean_density = mean(density)),
aes(year, mean_density, color = "Data (mean)"), linetype = 2,
inherit.aes = FALSE) + # Add data
geom_ribbon(alpha = 0.2) +
scale_color_brewer(palette = "Set1", name = "") +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) +
theme_plot() +
xlab('Year') +
ylab('Mean density') +
NULL)
ggsave(paste("figures/supp/east_mean_pred_comp_", i, ".png", sep = ""), dpi = 300)
}
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000381 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 3.81 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.647379 seconds (Warm-up)
#> Chain 1: 0.002592 seconds (Sampling)
#> Chain 1: 0.649971 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#>
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> group_by: one grouping variable (year)
#>
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.00034 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 3.4 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.84795 seconds (Warm-up)
#> Chain 1: 0.00274 seconds (Sampling)
#> Chain 1: 0.85069 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000216 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.16 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.763082 seconds (Warm-up)
#> Chain 1: 0.002856 seconds (Sampling)
#> Chain 1: 0.765938 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.00023 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.3 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.766496 seconds (Warm-up)
#> Chain 1: 0.003127 seconds (Sampling)
#> Chain 1: 0.769623 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✖ `ln_kappa` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✖ `ln_phi` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.00021 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.1 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.904379 seconds (Warm-up)
#> Chain 1: 0.003195 seconds (Sampling)
#> Chain 1: 0.907574 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000221 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.21 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.747641 seconds (Warm-up)
#> Chain 1: 0.003535 seconds (Sampling)
#> Chain 1: 0.751176 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✖ `ln_kappa` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000242 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.42 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.873608 seconds (Warm-up)
#> Chain 1: 0.002941 seconds (Sampling)
#> Chain 1: 0.876549 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✖ `thetaf` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000207 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.07 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.886776 seconds (Warm-up)
#> Chain 1: 0.003197 seconds (Sampling)
#> Chain 1: 0.889973 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 107,920 rows (89%), 13,490 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
# Save predictions and sims as data frames
dat_coef <- dplyr::bind_rows(data_list_coef)
dat_pred <- dplyr::bind_rows(data_list_pred)
dat_sim <- dplyr::bind_rows(data_list_sim)
dat_sims <- dplyr::bind_rows(data_list_sims)
write_csv(dat_coef, "output/east_dat_coef.csv")
write_csv(dat_pred, "output/east_dat_pred.csv")
write_csv(dat_sim, "output/east_dat_sim.csv")
write_csv(dat_sims, "output/east_dat_sims.csv")
data_list_coef <- list()
data_list_pred <- list()
data_list_sim <- list()
data_list_sims <- list()
for(i in unique(west$litter_category)) {
dd <- west %>%
filter(litter_category == i) %>%
droplevels()
m <- sdmTMB(
data = dd,
formula = density ~ 0 + year_f + depth_sc,
mesh = west_mesh,
family = tweedie(),
spatial = "on",
time = "year",
spatiotemporal = "off"
)
sanity(m)
tidy(m, conf.int = TRUE)
data_list_coef[[i]] <- tidy(m, conf.int = TRUE) %>% mutate(model = paste("west", i, sep = "_"))
# Plot residuals
mcmc_res <- residuals(m, type = "mle-mcmc", mcmc_iter = 201, mcmc_warmup = 200)
qqnorm(mcmc_res, asp = 1, main = paste("Normal Q-Q Plot", i, sep = " "))
qqline(mcmc_res)
# Predict on grid
pred <- predict(m, newdata = pred_grid_west) %>%
mutate(model = i)
data_list_pred[[i]] <- pred
# Get sims
nsim <- 500
sim <- predict(m, newdata = pred_grid_west, nsim = nsim)
# Get index & full index (i.e. returning all sims)
index_sim <- get_index_sims(sim,
area = rep(2*2, nrow(sim))) %>% mutate(model = paste("west", i, sep = "_"))
data_list_sim[[i]] <- index_sim
index_sim_full <- get_index_sims(sim,
area = rep(2*2, nrow(sim)),
return_sims = TRUE) %>% mutate(model = paste("west", i, sep = "_"))
data_list_sims[[i]] <- index_sim_full
# See how mean index compares to data
ncells <- filter(pred_grid_west, year == max(pred_grid_west$year)) %>% nrow()
index_sim_avg <- get_index_sims(sim, area = rep(1/ncells, nrow(sim)))
print(ggplot(index_sim_avg, aes(year, y = est, ymin = lwr, ymax = upr)) +
geom_line() +
geom_line(data = dd %>%
group_by(year) %>%
summarise(mean_density = mean(density)),
aes(year, mean_density, color = "Data (mean)"), linetype = 2,
inherit.aes = FALSE) + # Add data
scale_color_brewer(palette = "Set1", name = "") +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) +
theme_plot() +
xlab('Year') +
ylab('Mean density') +
NULL)
ggsave(paste("figures/supp/west_mean_pred_comp_", i, ".png", sep = ""), dpi = 300)
}
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✖ `ln_tau_O` standard error may be large
#> ℹ `ln_tau_O` is an internal parameter affecting `sigma_O`
#> ℹ `sigma_O` is the spatial standard deviation
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✖ `ln_kappa` standard error may be large
#> ℹ `ln_kappa` is an internal parameter affecting `range`
#> ℹ `range` is the distance at which data are effectively independent
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✔ No sigma parameters are < 0.01
#> ✖ `sigma_O` is larger than 100
#> ℹ Consider simplifying the model or adding priors
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000324 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 3.24 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.524486 seconds (Warm-up)
#> Chain 1: 0.002054 seconds (Sampling)
#> Chain 1: 0.52654 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#>
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.
#> group_by: one grouping variable (year)
#>
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000142 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.42 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.506556 seconds (Warm-up)
#> Chain 1: 0.002036 seconds (Sampling)
#> Chain 1: 0.508592 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✖ `ln_phi` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000147 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.47 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.456711 seconds (Warm-up)
#> Chain 1: 0.002227 seconds (Sampling)
#> Chain 1: 0.458938 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000146 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.46 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.517663 seconds (Warm-up)
#> Chain 1: 0.00242 seconds (Sampling)
#> Chain 1: 0.520083 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.00032 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 3.2 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.436401 seconds (Warm-up)
#> Chain 1: 0.001921 seconds (Sampling)
#> Chain 1: 0.438322 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✖ `ln_tau_O` standard error may be large
#> ℹ `ln_tau_O` is an internal parameter affecting `sigma_O`
#> ℹ `sigma_O` is the spatial standard deviation
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✖ `ln_kappa` standard error may be large
#> ℹ `ln_kappa` is an internal parameter affecting `range`
#> ℹ `range` is the distance at which data are effectively independent
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✔ No sigma parameters are < 0.01
#> ✖ `sigma_O` is larger than 100
#> ℹ Consider simplifying the model or adding priors
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000165 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.65 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.482356 seconds (Warm-up)
#> Chain 1: 0.001862 seconds (Sampling)
#> Chain 1: 0.484218 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000168 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.68 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.485545 seconds (Warm-up)
#> Chain 1: 0.002212 seconds (Sampling)
#> Chain 1: 0.487757 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#>
#> SAMPLING FOR MODEL 'tmb_generic' NOW (CHAIN 1).
#> Chain 1:
#> Chain 1: Gradient evaluation took 0.000149 seconds
#> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.49 seconds.
#> Chain 1: Adjust your expectations accordingly!
#> Chain 1:
#> Chain 1:
#> Chain 1: Iteration: 1 / 201 [ 0%] (Warmup)
#> Chain 1: Iteration: 20 / 201 [ 9%] (Warmup)
#> Chain 1: Iteration: 40 / 201 [ 19%] (Warmup)
#> Chain 1: Iteration: 60 / 201 [ 29%] (Warmup)
#> Chain 1: Iteration: 80 / 201 [ 39%] (Warmup)
#> Chain 1: Iteration: 100 / 201 [ 49%] (Warmup)
#> Chain 1: Iteration: 120 / 201 [ 59%] (Warmup)
#> Chain 1: Iteration: 140 / 201 [ 69%] (Warmup)
#> Chain 1: Iteration: 160 / 201 [ 79%] (Warmup)
#> Chain 1: Iteration: 180 / 201 [ 89%] (Warmup)
#> Chain 1: Iteration: 200 / 201 [ 99%] (Warmup)
#> Chain 1: Iteration: 201 / 201 [100%] (Sampling)
#> Chain 1:
#> Chain 1: Elapsed Time: 0.618031 seconds (Warm-up)
#> Chain 1: 0.002315 seconds (Sampling)
#> Chain 1: 0.620346 seconds (Total)
#> Chain 1:
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 19,984 rows (89%), 2,498 rows remaining
#> We generally recommend using `get_index(..., bias_correct = TRUE)`
#> rather than `get_index_sims()`.group_by: one grouping variable (year)
#> summarise: now 9 rows and 2 columns, ungrouped
#> Saving 12 x 7.42 in image
# Save predictions and sims as data frames
dat_coef <- dplyr::bind_rows(data_list_coef)
dat_pred <- dplyr::bind_rows(data_list_pred)
dat_sim <- dplyr::bind_rows(data_list_sim)
dat_sims <- dplyr::bind_rows(data_list_sims)
write_csv(dat_coef, "output/west_dat_coef.csv")
write_csv(dat_pred, "output/west_dat_pred.csv")
write_csv(dat_sim, "output/west_dat_sim.csv")
write_csv(dat_sims, "output/west_dat_sims.csv")
data_list_pred <- list()
data_list_coef <- list()
for(i in unique(east$litter_category)) {
dd <- east %>%
filter(litter_category == i) %>%
mutate(time_period = ifelse(year < 2016, "1", "2"),
time_period = as.factor(time_period),
year_ct = year - 2013) %>%
droplevels()
# Whole time period
m1 <- sdmTMB(
data = dd,
formula = density ~ year_ct + depth_sc,
mesh = east_mesh,
family = tweedie(),
spatial = "on",
spatiotemporal = "off"
)
# Interaction
m2 <- sdmTMB(
data = dd,
formula = density ~ year_ct*time_period + depth_sc,
mesh = east_mesh,
family = tweedie(),
spatial = "on",
spatiotemporal = "off"
)
sanity(m1)
sanity(m2)
tidy(m1)
tidy(m2)
temp_m1 <- tidy(m1, conf.int = TRUE)
temp_m2 <- tidy(m2, conf.int = TRUE)
nd <- data.frame(year = 2013:2021) %>%
mutate(depth_sc = mean(dd$depth_sc),
year_ct = year - 2013,
time_period = ifelse(year < 2016, "1", "2"),
time_period = as.factor(time_period))
m1_preds <- predict(m1, newdata = nd, re_form = NA, re_form_iid = NA)
m2_preds <- predict(m2, newdata = nd, re_form = NA, re_form_iid = NA)
nd$pred_m1 <- m1_preds$est
nd$pred_m2 <- m2_preds$est
nd2 <- nd %>% pivot_longer(c(pred_m1, pred_m2))
# Add sig or not
nd2 <- nd2 %>%
mutate(sig_slope = "No",
sig_slope = ifelse(name == "pred_m1" & temp_m1$conf.low[2] > 0 & temp_m1$conf.high[2] > 0, "Yes", sig_slope),
sig_slope = ifelse(name == "pred_m1" & temp_m1$conf.low[2] < 0 & temp_m1$conf.high[2] < 0, "Yes", sig_slope)) %>%
mutate(sig_slope = ifelse(name == "pred_m2" & year < 2016 & temp_m2$conf.low[2] > 0 & temp_m2$conf.high[2] > 0, "Yes", sig_slope),
sig_slope = ifelse(name == "pred_m2" & year < 2016 & temp_m2$conf.low[2] < 0 & temp_m2$conf.high[2] < 0, "Yes", sig_slope),
sig_slope = ifelse(name == "pred_m2" & year > 2015 & temp_m2$conf.low[2] + temp_m2$conf.low[5] > 0 &
temp_m2$conf.high[2] + temp_m2$conf.low[5] > 0, "Yes", sig_slope),
sig_slope = ifelse(name == "pred_m2" & year > 2015 & temp_m2$conf.low[2] + temp_m2$conf.low[5] < 0 &
temp_m2$conf.high[2] + temp_m2$conf.low[5] < 0, "Yes", sig_slope))
# ggplot() +
# geom_point(data = dd, aes(year, density)) +
# geom_line(data = nd2, aes(year, exp(value), color = name, linetype = sig_slope)) +
# scale_linetype_manual(values = c(2, 1)) +
# coord_cartesian(ylim = c(0, 10))
nd2 <- nd2 %>% mutate(model = paste("east", i, sep = "_"))
data_list_pred[[i]] <- nd2
# Significant interaction?
int <- filter(temp_m2, term == "year_ct:time_period2")
inter <- data.frame(model = paste("east", i, sep = "_")) %>%
mutate(sig = "No",
sig = ifelse(int$conf.low > 0 & int$conf.high > 0, "Yes", sig),
sig = ifelse(int$conf.low < 0 & int$conf.high < 0, "Yes", sig))
data_list_coef[[i]] <- inter
# Plot residuals
# mcmc_res <- residuals(m, type = "mle-mcmc", mcmc_iter = 201, mcmc_warmup = 200)
# qqnorm(mcmc_res, asp = 1, main = paste("Normal Q-Q Plot", i, sep = " "))
# qqline(mcmc_res)
}
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with one unique value and 0% NA
#> mutate: changed 6 values (33%) of 'sig_slope' (0 new NA)
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with one unique value and 0% NA
#> mutate: no changes
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with one unique value and 0% NA
#> mutate: changed 9 values (50%) of 'sig_slope' (0 new NA)
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with 2 unique values and 0% NA
#> mutate: changed 6 values (33%) of 'sig_slope' (0 new NA)
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with 2 unique values and 0% NA
#> mutate: changed 3 values (17%) of 'sig_slope' (0 new NA)
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with one unique value and 0% NA
#> mutate: changed 6 values (33%) of 'sig_slope' (0 new NA)
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✖ `b_j` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✖ `b_j` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✖ `thetaf` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✖ `ln_phi` gradient > 0.001
#> ℹ See `?run_extra_optimization()`
#> ℹ Or refit with `control = sdmTMBcontrol(newton_loops = 1)`
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with 2 unique values and 0% NA
#> mutate: changed 6 values (33%) of 'sig_slope' (0 new NA)
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
#> filter: removed 3,822 rows (88%), 546 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with 2 unique values and 0% NA
#> mutate: changed 3 values (17%) of 'sig_slope' (0 new NA)
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
# Save predictions
dat_pred <- dplyr::bind_rows(data_list_pred)
dat_coef <- dplyr::bind_rows(data_list_coef)
write_csv(dat_pred, "output/yr_cont_east_dat_pred.csv")
write_csv(dat_coef, "output/yr_cont_inter_east_dat_pred.csv")
data_list_pred <- list()
data_list_coef <- list()
for(i in unique(west$litter_category)) {
dd <- west %>%
filter(litter_category == i) %>%
mutate(time_period = ifelse(year < 2016, "1", "2"),
time_period = as.factor(time_period),
year_ct = year - 2013) %>%
droplevels()
# Whole time period
m1 <- sdmTMB(
data = dd,
formula = density ~ year_ct + depth_sc,
mesh = west_mesh,
family = tweedie(),
spatial = "on",
spatiotemporal = "off"
)
# Interaction
m2 <- sdmTMB(
data = dd,
formula = density ~ year_ct*time_period + depth_sc,
mesh = west_mesh,
family = tweedie(),
spatial = "on",
spatiotemporal = "off"
)
sanity(m1)
sanity(m2)
tidy(m1)
tidy(m2)
temp_m1 <- tidy(m1, conf.int = TRUE)
temp_m2 <- tidy(m2, conf.int = TRUE)
nd <- data.frame(year = 2013:2021) %>%
mutate(depth_sc = mean(dd$depth_sc),
year_ct = year - 2013,
time_period = ifelse(year < 2016, "1", "2"),
time_period = as.factor(time_period))
m1_preds <- predict(m1, newdata = nd, re_form = NA, re_form_iid = NA)
m2_preds <- predict(m2, newdata = nd, re_form = NA, re_form_iid = NA)
nd$pred_m1 <- m1_preds$est
nd$pred_m2 <- m2_preds$est
nd2 <- nd %>% pivot_longer(c(pred_m1, pred_m2))
# Add sig or not
nd2 <- nd2 %>%
mutate(sig_slope = "No",
sig_slope = ifelse(name == "pred_m1" & temp_m1$conf.low[2] > 0 & temp_m1$conf.high[2] > 0, "Yes", sig_slope),
sig_slope = ifelse(name == "pred_m1" & temp_m1$conf.low[2] < 0 & temp_m1$conf.high[2] < 0, "Yes", sig_slope)) %>%
mutate(sig_slope = ifelse(name == "pred_m2" & year < 2016 & temp_m2$conf.low[2] > 0 & temp_m2$conf.high[2] > 0, "Yes", sig_slope),
sig_slope = ifelse(name == "pred_m2" & year < 2016 & temp_m2$conf.low[2] < 0 & temp_m2$conf.high[2] < 0, "Yes", sig_slope),
sig_slope = ifelse(name == "pred_m2" & year > 2015 & temp_m2$conf.low[2] + temp_m2$conf.low[5] > 0 &
temp_m2$conf.high[2] + temp_m2$conf.low[5] > 0, "Yes", sig_slope),
sig_slope = ifelse(name == "pred_m2" & year > 2015 & temp_m2$conf.low[2] + temp_m2$conf.low[5] < 0 &
temp_m2$conf.high[2] + temp_m2$conf.low[5] < 0, "Yes", sig_slope))
nd2 <- nd2 %>% mutate(model = paste("west", i, sep = "_"))
data_list_pred[[i]] <- nd2
# Significant interaction?
int <- filter(temp_m2, term == "year_ct:time_period2")
inter <- data.frame(model = paste("west", i, sep = "_")) %>%
mutate(sig = "No",
sig = ifelse(int$conf.low > 0 & int$conf.high > 0, "Yes", sig),
sig = ifelse(int$conf.low < 0 & int$conf.high < 0, "Yes", sig))
data_list_coef[[i]] <- inter
# Plot residuals
# mcmc_res <- residuals(m, type = "mle-mcmc", mcmc_iter = 201, mcmc_warmup = 200)
# qqnorm(mcmc_res, asp = 1, main = paste("Normal Q-Q Plot", i, sep = " "))
# qqline(mcmc_res)
}
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✖ `ln_tau_O` standard error may be large
#> ℹ `ln_tau_O` is an internal parameter affecting `sigma_O`
#> ℹ `sigma_O` is the spatial standard deviation
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✖ `ln_kappa` standard error may be large
#> ℹ `ln_kappa` is an internal parameter affecting `range`
#> ℹ `range` is the distance at which data are effectively independent
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✔ No sigma parameters are < 0.01
#> ✖ `sigma_O` is larger than 100
#> ℹ Consider simplifying the model or adding priors
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✖ `ln_tau_O` standard error may be large
#> ℹ `ln_tau_O` is an internal parameter affecting `sigma_O`
#> ℹ `sigma_O` is the spatial standard deviation
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✖ `ln_kappa` standard error may be large
#> ℹ `ln_kappa` is an internal parameter affecting `range`
#> ℹ `range` is the distance at which data are effectively independent
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✔ No sigma parameters are < 0.01
#> ✖ `sigma_O` is larger than 100
#> ℹ Consider simplifying the model or adding priors
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with 2 unique values and 0% NA
#> mutate: changed 9 values (50%) of 'sig_slope' (0 new NA)
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with one unique value and 0% NA
#> mutate: changed 9 values (50%) of 'sig_slope' (0 new NA)
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with 2 unique values and 0% NA
#> mutate: changed 3 values (17%) of 'sig_slope' (0 new NA)
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with one unique value and 0% NA
#> mutate: changed 3 values (17%) of 'sig_slope' (0 new NA)
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with 2 unique values and 0% NA
#> mutate: changed 3 values (17%) of 'sig_slope' (0 new NA)
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✖ `ln_tau_O` standard error may be large
#> ℹ `ln_tau_O` is an internal parameter affecting `sigma_O`
#> ℹ `sigma_O` is the spatial standard deviation
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✖ `ln_kappa` standard error may be large
#> ℹ `ln_kappa` is an internal parameter affecting `range`
#> ℹ `range` is the distance at which data are effectively independent
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✔ No sigma parameters are < 0.01
#> ✖ `sigma_O` is larger than 100
#> ℹ Consider simplifying the model or adding priors
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✖ `ln_tau_O` standard error may be large
#> ℹ `ln_tau_O` is an internal parameter affecting `sigma_O`
#> ℹ `sigma_O` is the spatial standard deviation
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✖ `ln_kappa` standard error may be large
#> ℹ `ln_kappa` is an internal parameter affecting `range`
#> ℹ `range` is the distance at which data are effectively independent
#> ℹ Try simplifying the model, adjusting the mesh, or adding priors
#> ✔ No sigma parameters are < 0.01
#> ✖ `sigma_O` is larger than 100
#> ℹ Consider simplifying the model or adding priors
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with one unique value and 0% NA
#> mutate: no changes
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with 2 unique values and 0% NA
#> mutate: changed 3 values (17%) of 'sig_slope' (0 new NA)
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
#> filter: removed 3,913 rows (88%), 559 rows remaining
#> mutate: new variable 'time_period' (factor) with 2 unique values and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> ✔ Non-linear minimizer suggests successful convergence
#> ✔ Hessian matrix is positive definite
#> ✔ No extreme or very small eigenvalues detected
#> ✔ No gradients with respect to fixed effects are >= 0.001
#> ✔ No fixed-effect standard errors are NA
#> ✔ No standard errors look unreasonably large
#> ✔ No sigma parameters are < 0.01
#> ✔ No sigma parameters are > 100
#> ✔ Range parameter doesn't look unreasonably large
#> mutate: new variable 'depth_sc' (double) with one unique value and 0% NA
#> new variable 'year_ct' (double) with 9 unique values and 0% NA
#> new variable 'time_period' (factor) with 2 unique values and 0% NA
#> pivot_longer: reorganized (pred_m1, pred_m2) into (name, value) [was 9x6, now 18x6]
#> mutate: new variable 'sig_slope' (character) with 2 unique values and 0% NA
#> mutate: no changes
#> mutate: new variable 'model' (character) with one unique value and 0% NA
#> filter: removed 4 rows (80%), one row remaining
#> mutate: new variable 'sig' (character) with one unique value and 0% NA
# Save predictions
dat_pred <- dplyr::bind_rows(data_list_pred)
dat_coef <- dplyr::bind_rows(data_list_coef)
write_csv(dat_pred, "output/yr_cont_west_dat_pred.csv")
write_csv(dat_coef, "output/yr_cont_inter_west_dat_pred.csv")